Close

@InProceedings{LopesSanValAlmAra:2011:TrLeHu,
               author = "Lopes, Ana Paula B. and Santos, Elerson R. da S. and Valle, 
                         Eduardo A. and Almeida, Jussara M. de and Ara{\'u}jo, Arnaldo de 
                         Albuquerque",
          affiliation = "Depart. of Computer Science - Universidade Federal de Minas Gerais 
                         (UFMG),Belo Horizonte (MG), Brazil and Depart. of Exact and Tech. 
                         Sciences - Universidade Estadual de Santa Cruz (UESC),ilh{\'e}us, 
                         Brazil and Depart. of Computer Science - Universidade Federal de 
                         Minas Gerais (UFMG),Belo Horizonte (MG), Brazil and Universidade 
                         Estadual de Campinas (UNICAMP), Campinas (SP), Brazil and Depart. 
                         of Computer Science - Universidade Federal de Minas Gerais 
                         (UFMG),Belo Horizonte (MG), Brazil and Depart. of Computer Science 
                         - Universidade Federal de Minas Gerais (UFMG),Belo Horizonte (MG), 
                         Brazil",
                title = "Transfer Learning for Human Action Recognition",
            booktitle = "Proceedings...",
                 year = "2011",
               editor = "Lewiner, Thomas and Torres, Ricardo",
         organization = "Conference on Graphics, Patterns and Images, 24. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "action recognition, transfer learning, bags-of-visual-features, 
                         video understanding.",
             abstract = "To manually collect action samples from realistic videos is a 
                         time-consuming and error-prone task. This is a serious bottleneck 
                         to research related to video understanding, since the large 
                         intra-class variations of such videos demand training sets large 
                         enough to properly encompass those variations. Most authors 
                         dealing with this issue rely on (semi-) automated procedures to 
                         collect additional, generally noisy, examples. In this paper, we 
                         exploit a different approach, based on a Transfer Learning (TL) 
                         technique, to address the target task of action recognition. More 
                         specifically, we propose a framework that transfers the knowledge 
                         about concepts from a previously labeled still image database to 
                         the target action video database. It is assumed that, once 
                         identified in the target action database, these concepts provide 
                         some contextual clues to the action classifier. Our experiments 
                         with Caltech256 and Hollywood2 databases indicate: a) the 
                         feasibility of successfully using transfer learning techniques to 
                         detect concepts and, b) that it is indeed possible to enhance 
                         action recognition with the transferred knowledge of even a few 
                         concepts. In our case, only four concepts were enough to obtain 
                         statistically significant improvements for most actions.",
  conference-location = "Macei{\'o}, AL, Brazil",
      conference-year = "28-31 Aug. 2011",
                  doi = "10.1109/SIBGRAPI.2011.41",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2011.41",
             language = "en",
                  ibi = "8JMKD3MGPBW34M/3A3LQGS",
                  url = "http://urlib.net/ibi/8JMKD3MGPBW34M/3A3LQGS",
           targetfile = "PID1979911.pdf",
        urlaccessdate = "2024, Apr. 28"
}


Close